# Load libraries
import videoutils as vutils
import readutils as rutils
import plotutils as putils
import vedautils as veda
import statsutils as sutils
import datetime
import pandas as pd
import xarray as xr
import shutilNASA Data Fusion Analysis of Derechos and Their Impact on Rural America
Story authors: Madison Wallner1, Andrew Blackford1, Udaysankar Nair1
1The University of Alabama in Huntsville (UAH)
Notebook editor: Kyle Lesinger1
Example scripts to process and visualize information
#For retrieving data already catalogued in VEDA STAC
STAC_API_URL="https://openveda.cloud/api/stac"
RASTER_API_URL = "https://openveda.cloud/api/raster"What is a Derecho?
A derecho is a long-lasting, fast-moving windstorm that is produced from a line of severe thunderstorms. For a storm to be called a derecho, there must be:
- A concentrated area of severe wind reports over 400 miles (650 km) in length and at least 60 miles (96 km) wide.
- Several wind gusts reported over 75 mph (120 km/h).
- Sustained winds of at least 58 mph (93 km/h). (SPC)
Unlike single thunderstorms, derechos form within mesoscale (i.e., mid-sized) convective systems (MCS)—large storm clusters that produce strong, long-lasting straight-line winds.
Conditions that Help a Derecho Form Include:
- Strong instability (Convective Available Potential Energy (CAPE) over 2000–4000 J/kg): provides energy for strong updrafts and intense thunderstorms.
- High low-level moisture (dewpoints of 65–75 °F): keeps storms going by supplying moisture.
- Strong mid- and upper-level winds (wind shear over 40 knots): help organize storms and push them forward.
- A well-defined cold pool: rain-cooled air at the surface strengthens the storm by increasing wind speeds at the front of the system.
On May 12, 2022, a fast-moving serial derecho swept across Nebraska, the Dakotas, Iowa, and Minnesota, unleashing 100 mph winds, EF2 tornadoes, 2.5″ hail, and dust storms that devastated farms and destroyed corn and soybean fields.
The storm’s damage to silos, irrigation systems, power lines, and roads crippled grain elevators, processing plants, and delivery routes, triggering widespread economic hardship and food-supply disruptions.
Example: Retrieve GLDAS data from EarthData portal
Global Land Data Assimilation System (GLDAS) data helps assess soil moisture levels before and after the storm, showing how pre-existing drought conditions contributed to dust transport and how heavy rainfall may have impacted runoff and flooding.
Processing steps:
1.) Select a start and end date for the data granules
2.) Add the bucket name from EarthData portal
3.) Add the prefix for the data within the bucket (e.g., remote sensing mission, reanalysis model, etc.)
4.) Retrieve all files within the collection based on the start and end date. (If the same year is selected for start_date and end_date, then only the common year will be retrieved. Else all granules are retrived.
5.) Filter all collected granules based on the start_ and end_date range.
6.) Compute daily mean of hourly files
7.) Plot variable based on coordinates selected
8.) Create a .gif over the start_ and end_date range
Select dates and find Earthdata links
#Only subset data within a single year (requires >3GB RAM if you select more than 40 dates)
start_date = datetime.datetime(2022, 4, 1)
end_date = datetime.datetime(2022, 5, 12)
date_array = pd.date_range(start=start_date, end=end_date, freq='D').to_pydatetime()
bucket_name = "gesdisc-cumulus-prod-protected"
prefix = "GLDAS/GLDAS_NOAH025_3H.2.1"
#Don't get all of the data links if we don't have too. Only chooses a single year if the start_ and end_dates are in the same year
prefix = f'{prefix}/{start_date.year}' if (start_date.year) == (end_date.year) else prefix
#Download all links for either a specific year or for all years
all_links = rutils.list_files(bucket_name=bucket_name,
prefix = prefix,
region = "us-west-2",
file_extension = ".nc4")Filter based on start_date and end_date
#Subset the data based on the actual start_date and end_date to reduce number of processes
filtered_keys = []
for key in all_links:
# Example: extract "20220401" from filename
date_str = key.split('.')[-4][1:] # A20220401 -> 20220401
file_date = datetime.datetime.strptime(date_str, "%Y%m%d")
if start_date <= file_date <= end_date:
filtered_keys.append(key)
print(f"Filtered to {len(filtered_keys)} files between {start_date.strftime("%B")} {start_date.day} and {end_date.strftime("%B")} {end_date.day}, {end_date.year}.")Filtered to 336 files between April 1 and May 12, 2022.
Compute daily mean (soil moisture data is within 3-hr increments)
variable = 'SoilMoi0_10cm_inst'
'''Pass filtered keys through EarthData portal, stores in a dictionary,
and computes the daily mean'''
daily_mean_datasets = sutils.compute_daily_mean_from_keys_NETCDF(
bucket_name = bucket_name,
region_name = "us-west-2",
file_keys=filtered_keys,
variable = variable
)
print(list(daily_mean_datasets.keys())[0])Processing days: 100%|██████████| 42/42 [05:27<00:00, 7.80s/day]
2022-04-01
Convert to a single xarray object
#Select a soil moisture variable (0-10cm); items are currently in a dictionary
soil = [daily_mean_datasets[i] for i in list(daily_mean_datasets.keys())]
soil = xr.concat(soil,dim='time')
soil['time'] = date_array
print(f"First 10 dates: {soil.time.values[0:10]}")First 10 dates: ['2022-04-01T00:00:00.000000000' '2022-04-02T00:00:00.000000000'
'2022-04-03T00:00:00.000000000' '2022-04-04T00:00:00.000000000'
'2022-04-05T00:00:00.000000000' '2022-04-06T00:00:00.000000000'
'2022-04-07T00:00:00.000000000' '2022-04-08T00:00:00.000000000'
'2022-04-09T00:00:00.000000000' '2022-04-10T00:00:00.000000000']
Create interactive geospatial folium plot
putils.plot_folium_from_xarray(dataset=soil,
day_select='2022-05-11',
bbox=[-130, 33, -90, 50],
var_name_for_title='Soil Moisture from GLDAS (m³/m³) [subset]',
matplot_ramp = 'YlOrRd_r',
zoom_level = 5,
flipud=False,
save_tif=False,
tif_filename=None,
crs = 'EPSG4326',
opacity = 0.8)Create a gif over the date range
# Usage:
putils.matplotlib_gif(data=soil,
bbox= [-104, 30, -90, 50],
gif_savename= "soil_matplotlib.gif",
duration=2,
cmap="YlOrRd_r")<IPython.core.display.Image object>
✅ Saved GIF → soil_matplotlib.gif
Example: Pull WLDAS soil moisture data from the VEDA STAC catalog and visualize.
The Western Land Data Assimilation System (WLDAS) is a regional instance of NASA’s Land Information System (LIS), developed at Goddard Space Flight Center for the western United States. It integrates meteorological forcings (precipitation, radiation, temperature, humidity, wind, surface pressure) and satellite-derived parameters (vegetation class, soil texture, elevation) into the Noah-MP land surface model using data assimilation techniques.
Soil moisture critically controls the partitioning of net surface energy into latent (evapotranspiration) versus sensible (heating) fluxes. Wetter soils favor latent heat, stabilizing the boundary layer, whereas drier soils boost sensible heating, enhancing near-surface temperature and convective available potential energy (CAPE). These processes govern where and when thunderstorms can initiate and organize.
Processing steps:
1.) Choose STAC catalog ID and date
2.) Retrieve collection information and items from VEDA STAC catalog
3.) Retrieve item statistics and tiling information
4.) Plot data
Choose variable and retrieve json
collection_soil = 'wldas-derecho-sm'
collection = veda.get_collection(STAC_API_URL, collection_soil)
print(f'Collection information: {collection}')
items = veda.search_stac_features(stac_api_url=STAC_API_URL,
collection_id=collection_soil,
date="2022-05-11",
limit=100)
print(f"\nNumber of collection items found: {len(items)}")
item = items[0] #only return the first item
#Collection render information to display like the VEDA story
dashboard_rend, bands, asset_keys, vmin, vmax, cmap_name = veda.return_render_information(collection)Collection information: {'id': 'wldas-derecho-sm', 'type': 'Collection', 'links': [{'rel': 'items', 'type': 'application/geo+json', 'href': 'https://openveda.cloud/api/stac/collections/wldas-derecho-sm/items'}, {'rel': 'parent', 'type': 'application/json', 'href': 'https://openveda.cloud/api/stac/'}, {'rel': 'root', 'type': 'application/json', 'href': 'https://openveda.cloud/api/stac/'}, {'rel': 'self', 'type': 'application/json', 'href': 'https://openveda.cloud/api/stac/collections/wldas-derecho-sm'}, {'rel': 'http://www.opengis.net/def/rel/ogc/1.0/queryables', 'type': 'application/schema+json', 'title': 'Queryables', 'href': 'https://openveda.cloud/api/stac/collections/wldas-derecho-sm/queryables'}], 'title': 'WLDAS Soil Moisture Content (0-10cm)', 'assets': {'thumbnail': {'href': 'https://thumbnails.openveda.cloud/veda_derecho_thumbnail.png', 'type': 'image/png', 'roles': ['thumbnail'], 'title': 'Thumbnail', 'description': 'Photo by Jim Reed Photography (Intense derecho thunderstorm as it sweeps across farmland on September 23, 2018)\n'}}, 'extent': {'spatial': {'bbox': [[-179.1473680657398, 17.67439908061759, 179.7784301170628, 71.38922567703183]]}, 'temporal': {'interval': [['2022-05-11 00:00:00+00', '2022-05-11 23:59:59+00']]}}, 'license': 'CC0-1.0', 'renders': {'dashboard': {'bidx': [1], 'assets': ['cog_default'], 'rescale': [[0, 0.4]], 'resampling': 'nearest', 'colormap_name': 'rdylgn'}}, 'providers': [{'url': 'https://www.earthdata.nasa.gov/dashboard/', 'name': 'NASA VEDA', 'roles': ['host']}], 'summaries': {'datetime': ['2022-05-11T00:00:00Z']}, 'description': 'WLDAS data for the Western United States on May 11th, 2022.', 'item_assets': {'cog_default': {'type': 'image/tiff; application=geotiff; profile=cloud-optimized', 'roles': ['data', 'layer'], 'title': 'Default COG Layer', 'description': 'Cloud optimized default layer to display on map'}}, 'stac_version': '1.0.0', 'datetime_range': 'day', 'stac_extensions': ['https://stac-extensions.github.io/render/v1.0.0/schema.json', 'https://stac-extensions.github.io/item-assets/v1.0.0/schema.json'], 'dashboard:is_periodic': False, 'dashboard:time_density': 'day'}
Number of collection items found: 1
Retrieve tiling information
tiles = veda.get_raster_tilejson(raster_api_url = RASTER_API_URL,
collection_id = collection_soil,
item = item,
rescale = (vmin,vmax),
assets = asset_keys[0],
color_formula = "gamma+r+1.05",
colormap_name = cmap_name
)
tiles{'tilejson': '2.2.0',
'version': '1.0.0',
'scheme': 'xyz',
'tiles': ['https://openveda.cloud/api/raster/collections/wldas-derecho-sm/items/WLDAScog_SM_2022-05-11/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?assets=cog_default&color_formula=gamma+r+1.05&colormap_name=rdylgn&rescale=0%2C0.4'],
'minzoom': 0,
'maxzoom': 24,
'bounds': [-179.1473680657398,
17.67439908061759,
179.7784301170628,
71.38922567703183],
'center': [0.3155310256614996, 44.53181237882471, 0]}
Plot data
# Example usage:
tiles_url = tiles["tiles"][0] # from your TileJSON
lat_min, lon_min, lat_max, lon_max = 30, -110, 50, -95
center = ((lat_min + lat_max) / 2, (lon_min + lon_max) / 2)
m = putils.plot_folium_from_STAC_with_legend(
tiles_url=tiles_url,
vmin = vmin,
vmax = vmax,
center=center,
zoom_start=5.5,
width="100%",
height="800px",
attribution="VEDA",
layer_name="WLDAS soil moisture",
day_select = item['properties'].get('start_datetime'),
crs= "EPSG3857",
opacity = 0.6,
colormap_name = "".join(
c.upper() if i % 2 == 0 else c.lower()
for i, c in enumerate(cmap_name)
)
)
# In a Jupyter notebook, simply display `m`:
mExample: Retrieve MERRA-2 hourly files for Aerosol Optical Thickness
Aerosol Optical Thickness (AOT), also called Aerosol Optical Depth (AOD), is a dimensionless measure of how much sunlight aerosols—tiny particles like dust, smoke or sea salt—scatter and absorb as it travels through a column of atmosphere. In practical terms, an AOT of 0.1 means only 10 % of the direct solar beam is extinguished by aerosols before reaching the surface.
The intense straight-line winds in derechos can uplift large quantities of soil and dust, dramatically increasing AOT downwind. Tracking AOT in near-real-time reveals the spatial extent and intensity of these airborne dust plumes.
Processing steps:
1.) Select a start and end date for the data granules
2.) Add the bucket name from EarthData portal
3.) Add the prefix for the data within the bucket (e.g., remote sensing mission, reanalysis model, etc.)
4.) Retrive all files within the collection based on the start and end date. (If the same year is selected for start_date and end_date, then only the common year will be retrieved. Else all granules are retrived.
5.) Filter all collected granules based on the start_ and end_date range.
6.) Plot variable based on coordinates selected
7.) Create a .gif over the start_ and end_date range
Select and filter
#Only subset data within a single year
start_date = datetime.datetime(2022, 5, 12)
end_date = datetime.datetime(2022, 5, 12)
date_array = pd.date_range(start=start_date, end=end_date, freq='D').to_pydatetime()
bucket_name = "gesdisc-cumulus-prod-protected"
prefix = "MERRA2/M2T1NXAER.5.12.4"
#Don't get all of the data links if we don't have too. Only chooses a single year if the start_ and end_dates are in the same year
prefix = f'{prefix}/{start_date.year}' if (start_date.year) == (end_date.year) else prefix
#Download all links for either a specific year or for all years
all_links = rutils.list_files(bucket_name=bucket_name,
prefix = prefix,
region = "us-west-2",
file_extension = ".nc4")
#Subset the data based on the start_date and end_date to reduce number of processes
filtered_keys = []
for key in all_links:
# Example: extract "20220401" from filename
date_str = key.split('.')[-2] # A20220401 -> 20220401
file_date = datetime.datetime.strptime(date_str, "%Y%m%d")
if start_date <= file_date <= end_date:
filtered_keys.append(key)
print(f"Filtered to {len(filtered_keys)} files between {start_date.strftime("%B")} {start_date.day} and {end_date.strftime("%B")} {end_date.day}, {end_date.year}.")
Filtered to 1 files between May 12 and May 12, 2022.
variable = 'TOTEXTTAU' #Total aerosol extinction optical thickness
# Compute daily mean for all variables within each file
# Assume `filtered_keys` is your list of S3 paths (.nc4 files)
merra_datasets = rutils.load_datasets_from_keys(
bucket_name = bucket_name,
region_name = "us-west-2",
file_keys=filtered_keys,
variable = variable,
data_source = 'MERRA'
)
print(list(merra_datasets.keys())[0])Loading daily datasets: 100%|██████████| 1/1 [00:08<00:00, 8.77s/day]
2022-05-12
# Currently each day is its own key
merra_datasets{datetime.date(2022, 5, 12): [<xarray.Dataset> Size: 20MB
Dimensions: (time: 24, lat: 361, lon: 576)
Coordinates:
* lon (lon) float64 5kB -180.0 -179.4 -178.8 ... 178.1 178.8 179.4
* lat (lat) float64 3kB -90.0 -89.5 -89.0 -88.5 ... 88.5 89.0 89.5 90.0
* time (time) datetime64[ns] 192B 2022-05-12T00:30:00 ... 2022-05-12T...
Data variables:
TOTEXTTAU (time, lat, lon) float32 20MB ...
Attributes: (12/30)
History: Original file generated: Sun May 22 22...
Comment: GMAO filename: d5124_m2_jan10.tavg1_2d...
Filename: MERRA2_400.tavg1_2d_aer_Nx.20220512.nc4
Conventions: CF-1
Institution: NASA Global Modeling and Assimilation ...
References: http://gmao.gsfc.nasa.gov
... ...
Contact: http://gmao.gsfc.nasa.gov
identifier_product_doi: 10.5067/KLICLTZ8EM9D
RangeBeginningDate: 2022-05-12
RangeBeginningTime: 00:00:00.000000
RangeEndingDate: 2022-05-12
RangeEndingTime: 23:59:59.000000]}
#Only grab the first object in the dictionary
single_date = merra_datasets.get(list(merra_datasets.keys())[0])[0]Plot
# Usage:
putils.matplotlib_gif(data=single_date[variable],
bbox= [-110, 30, -90, 40],
gif_savename= "merra_aot_matplotlib.gif",
duration=2,
cmap="YlOrRd_r")<IPython.core.display.Image object>
✅ Saved GIF → merra_aot_matplotlib.gif
Retrieve and plot MODIS Aerosol Optical Depth
Example: Pull MODIS Aerosol Optical Depth data from the VEDA STAC catalog and visualize
collection_modis = 'modis-derecho'
date = "2022-05-12"
collection = veda.get_collection(STAC_API_URL, collection_modis)
collection
items = veda.search_stac_features(stac_api_url=STAC_API_URL,
collection_id=collection_modis,
date=date,
limit=100)
print(f"Number of collection items found: {len(items)}")
item = items[0] #only return the first itemNumber of collection items found: 1
#Collection render information to display like the VEDA story
dashboard_rend, bands, asset_keys, vmin, vmax, cmap_name = veda.return_render_information(collection)
tiles = veda.get_raster_tilejson(raster_api_url = RASTER_API_URL,
collection_id = collection_modis,
item = item,
rescale = (vmin, vmax),
assets = asset_keys[0],
color_formula = "gamma+r+1.05",
colormap_name = cmap_name
)
tiles{'tilejson': '2.2.0',
'version': '1.0.0',
'scheme': 'xyz',
'tiles': ['https://openveda.cloud/api/raster/collections/modis-derecho/items/derecho_MODIS_AOD_2022-05-12/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?assets=cog_default&color_formula=gamma+r+1.05&colormap_name=ylorbr&rescale=0%2C0.2'],
'minzoom': 0,
'maxzoom': 24,
'bounds': [-104.9587947491025,
39.60418504240213,
-87.8641172323396,
49.635984790222665],
'center': [-96.41145599072104, 44.62008491631239, 0]}
# Example usage:
tiles_url = tiles["tiles"][0] # from your TileJSON
lat_min, lon_min, lat_max, lon_max = 35, -100, 50, -85
center = ((lat_min + lat_max) / 2, (lon_min + lon_max) / 2)
m = putils.plot_folium_from_STAC_with_legend(
tiles_url=tiles_url,
vmin = vmin,
vmax = vmax,
center=center,
zoom_start=7,
width="100%",
height="800px",
attribution="VEDA",
layer_name="MODIS AOD",
day_select = item['properties'].get('datetime'),
crs= "EPSG3857",
opacity = 0.7,
colormap_name = "".join(
c.upper() if i % 2 == 0 else c.lower()
for i, c in enumerate(cmap_name)
)
)
# In a Jupyter notebook, simply display `m`:
mExample: Pull NCEI interpolated wind gusts data from the VEDA STAC catalog and visualize.
The NCEI Interpolated Wind Gusts product takes the discrete station‐reported peak wind gusts from the NCEI Storm Events Database—a standardized archive of severe‐weather observations dating back to 1950—and uses spatial interpolation to generate a continuous gridded field of maximum gust speeds across the derecho swath.
By filling in the gaps between point measurements, it reveals the full geographic extent and intensity gradients of the derecho’s outflow winds, often uncovering zones of extreme gust (e.g., 70 mph +) that lie between and beyond individual station sites.
collection_wind = 'windgusts-derecho'
date = "2022-05-12"
collection = veda.get_collection(STAC_API_URL, collection_wind)
collection
items = veda.search_stac_features(stac_api_url=STAC_API_URL,
collection_id=collection_wind,
date=date,
limit=100)
print(f"Number of collection items found: {len(items)}")
item = items[0] #only return the first itemNumber of collection items found: 1
#Collection render information to display like the VEDA story
dashboard_rend, bands, asset_keys, vmin, vmax, cmap_name = veda.return_render_information(collection)
tiles = veda.get_raster_tilejson(raster_api_url = RASTER_API_URL,
collection_id = collection_wind,
item = item,
rescale = (vmin, vmax),
assets = asset_keys[0],
color_formula = "gamma+r+1.05",
colormap_name = cmap_name
)
tiles{'tilejson': '2.2.0',
'version': '1.0.0',
'scheme': 'xyz',
'tiles': ['https://openveda.cloud/api/raster/collections/windgusts-derecho/items/Windgusts_cog_2022-05-12/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?assets=cog_default&color_formula=gamma+r+1.05&colormap_name=bupu&rescale=75%2C100'],
'minzoom': 0,
'maxzoom': 24,
'bounds': [-104.98043829543191,
39.61453189361588,
-87.85370728055949,
49.62204006079752],
'center': [-96.41707278799569, 44.618285977206696, 0]}
# Example usage:
tiles_url = tiles["tiles"][0] # from your TileJSON
lat_min, lon_min, lat_max, lon_max = 35, -105, 50, -90
center = 43.8250, -97.7
m = putils.plot_folium_from_STAC_with_legend(
tiles_url=tiles_url,
vmin = vmin,
vmax = vmax,
center=center,
zoom_start=7,
width="100%",
height="800px",
attribution="VEDA",
layer_name="Windgusts",
day_select = item['properties'].get('datetime'),
crs= "EPSG3857",
opacity = 0.7,
colormap_name = "".join(
c.upper() if i % 2 == 0 else c.lower()
for i, c in enumerate(cmap_name)
)
)
# In a Jupyter notebook, simply display `m`:
mExample: Pull Black Marble Night Lights data from the VEDA STAC catalog and visualize.
During extreme windstorms such as derechos, sudden drops in nighttime brightness in the Black Marble imagery reveal where power infrastructure has failed and outages have occurred, while the re-illumination of areas over subsequent days tracks the pace and extent of electrical service restoration. This makes Black Marble a powerful, near-real-time proxy for assessing societal impacts and grid resilience in the storm’s wake.
collection_nightlights = 'nightlights-derecho'
date = "2022-05-10"
collection = veda.get_collection(STAC_API_URL, collection_nightlights)
collection
items = veda.search_stac_features(stac_api_url=STAC_API_URL,
collection_id=collection_nightlights,
date=date,
limit=100)
print(f"Number of collection items found: {len(items)}")
item = items[0] #only return the first itemNumber of collection items found: 1
#Collection render information to display like the VEDA story
dashboard_rend, bands, asset_keys, vmin, vmax, cmap_name = veda.return_render_information(collection)
tiles = veda.get_raster_tilejson(raster_api_url = RASTER_API_URL,
collection_id = collection_nightlights,
item = item,
rescale = (vmin,vmax),
assets = asset_keys[0],
color_formula = "gamma+r+1.05",
colormap_name = cmap_name
)
tiles{'tilejson': '2.2.0',
'version': '1.0.0',
'scheme': 'xyz',
'tiles': ['https://openveda.cloud/api/raster/collections/nightlights-derecho/items/Nightlightscog_2022-05-10/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?assets=cog_default&color_formula=gamma+r+1.05&colormap_name=bwr&rescale=-255%2C255'],
'minzoom': 0,
'maxzoom': 24,
'bounds': [-112.07926856247974,
39.47957304015183,
-88.2545230530161,
50.52441131325872],
'center': [-100.16689580774792, 45.00199217670527, 0]}
# Example usage:
tiles_url = tiles["tiles"][0] # from your TileJSON
lat_min, lon_min, lat_max, lon_max = 35, -105, 50, -90
center = 43.55,-96.94
m = putils.plot_folium_from_STAC_with_legend(
tiles_url=tiles_url,
vmin = vmin,
vmax = vmax,
center=center,
zoom_start=10,
width="100%",
height="800px",
attribution="VEDA",
layer_name="Black Marble Night Lights",
day_select = item['properties'].get('datetime'),
crs= "EPSG3857",
colormap_name = cmap_name,
opacity = 0.7
)
# In a Jupyter notebook, simply display `m`:
mThe economic impact reached beyond the crop fields. Many grain silos, irrigation systems, and farm buildings were also damaged or destroyed, adding to the financial burden on farmers. See image below to identify crop types across the Midwest and the Storm Prediction Center reports across the Midwest.
Use the slider below to investigate the crop-land type and the extreme weather reports across the Midwest.
putils.plotly_dual_slider_window(image1_path="images/derecho/Derecho-Crop.jpg",
image2_path="images/derecho/Derecho-storm-reports.jpg",
target_width=800,
fig_width=800,
fig_height=600)Example: Pull Global Precipitation Measurement data through the Common Metadata Repository (CMR) STAC API.
NASA’s Global Precipitation Measurement Mission (GPM) uses satellites to measure Earth’s rain and snowfall for the benefit of mankind. Launched by NASA and JAXA on Feb. 27th, 2014, GPM is an international mission that sets the standard for spaceborne precipitation measurements.
# GENERATED USING https://dev-titiler-cmr.delta-backend.com/api.html#/TileJSON/tilejson_endpoint__tileMatrixSetId__tilejson_json_get
#Specific for May 05, 2022
tilejson_url = 'https://dev-titiler-cmr.delta-backend.com/tiles/WebMercatorQuad/{z}/{x}/{y}@1x?concept_id=C2723754864-GES_DISC&datetime=2022-05-12T09%3A00%3A00Z&variable=precipitation&backend=xarray&colormap_name=blues&rescale=0%2C48'center= 43.55,-96.94
m = putils.plot_folium_from_STAC_with_legend(
tiles_url=tilejson_url,
vmin = 0,
vmax = 46,
center= center,
zoom_start=4,
width="100%",
height="800px",
attribution="VEDA",
layer_name="GPM Imerge Precipitation",
day_select = "2022-05-12",
crs= "EPSG3857",
colormap_name = "Blues",
opacity = 0.6,
)
mEnd of visualizations
# Remove created .gifs
import os
import glob
# find all .gif files in the current directory
for gif_path in glob.glob("*.gif"):
try:
os.remove(gif_path)
print(f"Removed {gif_path}")
except OSError as e:
print(f"Error removing {gif_path}: {e}")Removed soil_matplotlib.gif
Removed merra_aot_matplotlib.gif